BLANK();
- DEFINE(XSI_PSR_IC_OFS, offsetof(vcpu_info_t, arch.interrupt_collection_enabled));
- DEFINE(XSI_PSR_IC, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.interrupt_collection_enabled)));
- DEFINE(XSI_PSR_I_OFS, offsetof(vcpu_info_t, arch.interrupt_delivery_enabled));
- DEFINE(XSI_IIP_OFS, offsetof(vcpu_info_t, arch.iip));
- DEFINE(XSI_IFA_OFS, offsetof(vcpu_info_t, arch.ifa));
- DEFINE(XSI_ITIR_OFS, offsetof(vcpu_info_t, arch.itir));
- DEFINE(XSI_IPSR, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.ipsr)));
- DEFINE(XSI_IPSR_OFS, offsetof(vcpu_info_t, arch.ipsr));
- DEFINE(XSI_IFS_OFS, offsetof(vcpu_info_t, arch.ifs));
- DEFINE(XSI_ISR_OFS, offsetof(vcpu_info_t, arch.isr));
- DEFINE(XSI_IIM_OFS, offsetof(vcpu_info_t, arch.iim));
- DEFINE(XSI_BANKNUM_OFS, offsetof(vcpu_info_t, arch.banknum));
- DEFINE(XSI_BANK0_OFS, offsetof(vcpu_info_t, arch.bank0_regs[0]));
- DEFINE(XSI_BANK1_OFS, offsetof(vcpu_info_t, arch.bank1_regs[0]));
- DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
- DEFINE(XSI_METAPHYS_OFS, offsetof(vcpu_info_t, arch.metaphysical_mode));
- DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(vcpu_info_t, arch.precover_ifs));
- DEFINE(XSI_INCOMPL_REG_OFS, offsetof(vcpu_info_t, arch.incomplete_regframe));
- DEFINE(XSI_PEND_OFS, offsetof(vcpu_info_t, arch.pending_interruption));
- DEFINE(XSI_RR0_OFS, offsetof(vcpu_info_t, arch.rrs[0]));
- DEFINE(XSI_TPR_OFS, offsetof(vcpu_info_t, arch.tpr));
- DEFINE(XSI_PTA_OFS, offsetof (vcpu_info_t, arch.pta));
- DEFINE(XSI_ITV_OFS, offsetof(vcpu_info_t, arch.itv));
+ DEFINE(XSI_PSR_IC_OFS, offsetof(mapped_regs_t, interrupt_collection_enabled));
+ DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_collection_enabled)));
+ DEFINE(XSI_PSR_I_OFS, offsetof(mapped_regs_t, interrupt_delivery_enabled));
+ DEFINE(XSI_IIP_OFS, offsetof(mapped_regs_t, iip));
+ DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip)));
+ DEFINE(XSI_IFA_OFS, offsetof(mapped_regs_t, ifa));
+ DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa)));
+ DEFINE(XSI_ITIR_OFS, offsetof(mapped_regs_t, itir));
+ DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir)));
+
+ DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
+ DEFINE(XSI_IPSR_OFS, offsetof(mapped_regs_t, ipsr));
+ DEFINE(XSI_IFS_OFS, offsetof(mapped_regs_t, ifs));
+ DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs)));
+ DEFINE(XSI_ISR_OFS, offsetof(mapped_regs_t, isr));
+ DEFINE(XSI_IIM_OFS, offsetof(mapped_regs_t, iim));
+ DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum));
+ DEFINE(XSI_BANK0_OFS, offsetof(mapped_regs_t, bank0_regs[0]));
+ DEFINE(XSI_BANK1_OFS, offsetof(mapped_regs_t, bank1_regs[0]));
+ DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
+ DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode));
+ DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs));
+ DEFINE(XSI_INCOMPL_REG_OFS, offsetof(mapped_regs_t, incomplete_regframe));
+ DEFINE(XSI_PEND_OFS, offsetof(mapped_regs_t, pending_interruption));
+ DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
+ DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr));
+ DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta));
+ DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv));
//DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
//DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
//DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
#define OFFSET(_sym, _str, _mem) \
DEFINE(_sym, offsetof(_str, _mem));
-#ifndef CONFIG_VTI
-#define SHARED_ARCHINFO_ADDR SHAREDINFO_ADDR
-#endif
-
void foo(void)
{
DEFINE(XSI_BASE, SHARED_ARCHINFO_ADDR);
- DEFINE(XSI_PSR_I_OFS, offsetof(arch_vcpu_info_t, interrupt_delivery_enabled));
- DEFINE(XSI_PSR_I, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, interrupt_delivery_enabled)));
- DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, ipsr)));
- DEFINE(XSI_IPSR_OFS, offsetof(arch_vcpu_info_t, ipsr));
- DEFINE(XSI_IIP_OFS, offsetof(arch_vcpu_info_t, iip));
- DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, iip)));
- DEFINE(XSI_IFS_OFS, offsetof(arch_vcpu_info_t, ifs));
- DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, ifs)));
- DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(arch_vcpu_info_t, precover_ifs));
- DEFINE(XSI_PRECOVER_IFS, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, precover_ifs)));
- DEFINE(XSI_ISR_OFS, offsetof(arch_vcpu_info_t, isr));
- DEFINE(XSI_ISR, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, isr)));
- DEFINE(XSI_IFA_OFS, offsetof(arch_vcpu_info_t, ifa));
- DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, ifa)));
- DEFINE(XSI_IIPA_OFS, offsetof(arch_vcpu_info_t, iipa));
- DEFINE(XSI_IIPA, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, iipa)));
- DEFINE(XSI_IIM_OFS, offsetof(arch_vcpu_info_t, iim));
- DEFINE(XSI_IIM, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, iim)));
- DEFINE(XSI_TPR_OFS, offsetof(arch_vcpu_info_t, tpr));
- DEFINE(XSI_TPR, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, tpr)));
- DEFINE(XSI_IHA_OFS, offsetof(arch_vcpu_info_t, iha));
- DEFINE(XSI_IHA, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, iha)));
- DEFINE(XSI_ITIR_OFS, offsetof(arch_vcpu_info_t, itir));
- DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, itir)));
- DEFINE(XSI_ITV_OFS, offsetof(arch_vcpu_info_t, itv));
- DEFINE(XSI_ITV, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, itv)));
- DEFINE(XSI_PTA_OFS, offsetof(arch_vcpu_info_t, pta));
- DEFINE(XSI_PTA, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, pta)));
- DEFINE(XSI_PSR_IC_OFS, offsetof(arch_vcpu_info_t, interrupt_collection_enabled));
- DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, interrupt_collection_enabled)));
- DEFINE(XSI_PEND_OFS, offsetof(arch_vcpu_info_t, pending_interruption));
- DEFINE(XSI_PEND, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, pending_interruption)));
- DEFINE(XSI_INCOMPL_REGFR_OFS, offsetof(arch_vcpu_info_t, incomplete_regframe));
- DEFINE(XSI_INCOMPL_REGFR, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, incomplete_regframe)));
- DEFINE(XSI_DELIV_MASK0_OFS, offsetof(arch_vcpu_info_t, delivery_mask[0]));
- DEFINE(XSI_DELIV_MASK0, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, delivery_mask[0])));
- DEFINE(XSI_METAPHYS_OFS, offsetof(arch_vcpu_info_t, metaphysical_mode));
- DEFINE(XSI_METAPHYS, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, metaphysical_mode)));
+ DEFINE(XSI_PSR_I_OFS, offsetof(mapped_regs_t, interrupt_delivery_enabled));
+ DEFINE(XSI_PSR_I, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_delivery_enabled)));
+ DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
+ DEFINE(XSI_IPSR_OFS, offsetof(mapped_regs_t, ipsr));
+ DEFINE(XSI_IIP_OFS, offsetof(mapped_regs_t, iip));
+ DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip)));
+ DEFINE(XSI_IFS_OFS, offsetof(mapped_regs_t, ifs));
+ DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs)));
+ DEFINE(XSI_PRECOVER_IFS_OFS, offsetof(mapped_regs_t, precover_ifs));
+ DEFINE(XSI_PRECOVER_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, precover_ifs)));
+ DEFINE(XSI_ISR_OFS, offsetof(mapped_regs_t, isr));
+ DEFINE(XSI_ISR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, isr)));
+ DEFINE(XSI_IFA_OFS, offsetof(mapped_regs_t, ifa));
+ DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa)));
+ DEFINE(XSI_IIPA_OFS, offsetof(mapped_regs_t, iipa));
+ DEFINE(XSI_IIPA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iipa)));
+ DEFINE(XSI_IIM_OFS, offsetof(mapped_regs_t, iim));
+ DEFINE(XSI_IIM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iim)));
+ DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr));
+ DEFINE(XSI_TPR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tpr)));
+ DEFINE(XSI_IHA_OFS, offsetof(mapped_regs_t, iha));
+ DEFINE(XSI_IHA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iha)));
+ DEFINE(XSI_ITIR_OFS, offsetof(mapped_regs_t, itir));
+ DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir)));
+ DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv));
+ DEFINE(XSI_ITV, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itv)));
+ DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta));
+ DEFINE(XSI_PTA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pta)));
+ DEFINE(XSI_PSR_IC_OFS, offsetof(mapped_regs_t, interrupt_collection_enabled));
+ DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_collection_enabled)));
+ DEFINE(XSI_PEND_OFS, offsetof(mapped_regs_t, pending_interruption));
+ DEFINE(XSI_PEND, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pending_interruption)));
+ DEFINE(XSI_INCOMPL_REGFR_OFS, offsetof(mapped_regs_t, incomplete_regframe));
+ DEFINE(XSI_INCOMPL_REGFR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, incomplete_regframe)));
+ DEFINE(XSI_DELIV_MASK0_OFS, offsetof(mapped_regs_t, delivery_mask[0]));
+ DEFINE(XSI_DELIV_MASK0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, delivery_mask[0])));
+ DEFINE(XSI_METAPHYS_OFS, offsetof(mapped_regs_t, metaphysical_mode));
+ DEFINE(XSI_METAPHYS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, metaphysical_mode)));
- DEFINE(XSI_BANKNUM_OFS, offsetof(arch_vcpu_info_t, banknum));
- DEFINE(XSI_BANKNUM, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, banknum)));
+ DEFINE(XSI_BANKNUM_OFS, offsetof(mapped_regs_t, banknum));
+ DEFINE(XSI_BANKNUM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, banknum)));
- DEFINE(XSI_BANK0_R16_OFS, offsetof(arch_vcpu_info_t, bank0_regs[0]));
- DEFINE(XSI_BANK0_R16, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, bank0_regs[0])));
- DEFINE(XSI_BANK1_R16_OFS, offsetof(arch_vcpu_info_t, bank1_regs[0]));
- DEFINE(XSI_BANK1_R16, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, bank1_regs[0])));
- DEFINE(XSI_RR0_OFS, offsetof(arch_vcpu_info_t, rrs[0]));
- DEFINE(XSI_RR0, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, rrs[0])));
- DEFINE(XSI_KR0_OFS, offsetof(arch_vcpu_info_t, krs[0]));
- DEFINE(XSI_KR0, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, krs[0])));
- DEFINE(XSI_PKR0_OFS, offsetof(arch_vcpu_info_t, pkrs[0]));
- DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, pkrs[0])));
- DEFINE(XSI_TMP0_OFS, offsetof(arch_vcpu_info_t, tmp[0]));
- DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(arch_vcpu_info_t, tmp[0])));
+ DEFINE(XSI_BANK0_R16_OFS, offsetof(mapped_regs_t, bank0_regs[0]));
+ DEFINE(XSI_BANK0_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, bank0_regs[0])));
+ DEFINE(XSI_BANK1_R16_OFS, offsetof(mapped_regs_t, bank1_regs[0]));
+ DEFINE(XSI_BANK1_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, bank1_regs[0])));
+ DEFINE(XSI_RR0_OFS, offsetof(mapped_regs_t, rrs[0]));
+ DEFINE(XSI_RR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, rrs[0])));
+ DEFINE(XSI_KR0_OFS, offsetof(mapped_regs_t, krs[0]));
+ DEFINE(XSI_KR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, krs[0])));
+ DEFINE(XSI_PKR0_OFS, offsetof(mapped_regs_t, pkrs[0]));
+ DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pkrs[0])));
+ DEFINE(XSI_TMP0_OFS, offsetof(mapped_regs_t, tmp[0]));
+ DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tmp[0])));
}
*/
memset(d->shared_info, 0, PAGE_SIZE);
+ d->shared_info->vcpu_data[v->vcpu_id].arch.privregs =
+ alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+ printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[0].arch.privregs);
+ memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, PAGE_SIZE);
v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
/* Mask all events, and specific port will be unmasked
* when customer subscribes to it.
/* FIXME: This is identity mapped address for xenheap.
* Do we need it at all?
*/
- d->xen_vastart = 0xf000000000000000;
- d->xen_vaend = 0xf300000000000000;
+ d->xen_vastart = XEN_START_ADDR;
+ d->xen_vaend = XEN_END_ADDR;
d->arch.breakimm = 0x1000;
}
#else // CONFIG_VTI
while (1);
}
memset(d->shared_info, 0, PAGE_SIZE);
+ d->shared_info->vcpu_data[0].arch.privregs =
+ alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+ printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[0].arch.privregs);
+ memset(d->shared_info->vcpu_data[0].arch.privregs, 0, PAGE_SIZE);
v->vcpu_info = &(d->shared_info->vcpu_data[0]);
- d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
+ d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
BUG();
- v->vcpu_info->arch.metaphysical_mode = 1;
+ VCPU(v, metaphysical_mode) = 1;
v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
#define DOMAIN_RID_BITS_DEFAULT 18
v->arch.starting_rid = d->arch.starting_rid;
v->arch.ending_rid = d->arch.ending_rid;
// the following will eventually need to be negotiated dynamically
- d->xen_vastart = 0xf000000000000000;
- d->xen_vaend = 0xf300000000000000;
- d->shared_info_va = 0xf100000000000000;
+ d->xen_vastart = XEN_START_ADDR;
+ d->xen_vaend = XEN_END_ADDR;
+ d->shared_info_va = SHAREDINFO_ADDR;
d->arch.breakimm = 0x1000;
v->arch.breakimm = d->arch.breakimm;
printf("arch_getdomaininfo_ctxt\n");
c->regs = *regs;
- c->vcpu = v->vcpu_info->arch;
+ c->vcpu.evtchn_vector = v->vcpu_info->arch.evtchn_vector;
+#if 0
+ if (c->vcpu.privregs && copy_to_user(c->vcpu.privregs,
+ v->vcpu_info->arch.privregs, sizeof(mapped_regs_t))) {
+ printk("Bad ctxt address: 0x%lx\n", c->vcpu.privregs);
+ return -EFAULT;
+ }
+#endif
+
c->shared = v->domain->shared_info->arch;
}
regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
regs->ar_rsc |= (2 << 2); /* force PL2/3 */
- v->vcpu_info->arch = c->vcpu;
+ v->vcpu_info->arch.evtchn_vector = c->vcpu.evtchn_vector;
+ if ( c->vcpu.privregs && copy_from_user(v->vcpu_info->arch.privregs,
+ c->vcpu.privregs, sizeof(mapped_regs_t))) {
+ printk("Bad ctxt address in arch_set_info_guest: 0x%lx\n", c->vcpu.privregs);
+ return -EFAULT;
+ }
+
init_all_rr(v);
// this should be in userspace
new_thread(v, c->guest_iip, 0, 0);
- d->xen_vastart = 0xf000000000000000;
- d->xen_vaend = 0xf300000000000000;
+ d->xen_vastart = XEN_START_ADDR;
+ d->xen_vaend = XEN_END_ADDR;
d->arch.breakimm = 0x1000 + d->domain_id;
v->arch._thread.on_ustack = 0;
void arch_do_boot_vcpu(struct vcpu *v)
{
+ struct domain *d = v->domain;
printf("arch_do_boot_vcpu: not implemented\n");
+
+ d->shared_info->vcpu_data[v->vcpu_id].arch.privregs =
+ alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
+ printf("arch_vcpu_info=%p\n", d->shared_info->vcpu_data[v->vcpu_id].arch.privregs);
+ memset(d->shared_info->vcpu_data[v->vcpu_id].arch.privregs, 0, PAGE_SIZE);
return;
}
#ifdef CONFIG_SMP
#error "sharedinfo doesn't handle smp yet"
#endif
- regs->r31 = &((shared_info_t *)SHAREDINFO_ADDR)->vcpu_data[0].arch;
+ regs->r31 = &(((mapped_regs_t *)SHARED_ARCHINFO_ADDR)->ipsr);
PSCB(v,interrupt_delivery_enabled) = 0;
PSCB(v,interrupt_collection_enabled) = 0;
#include <asm/regionreg.h>
#include <asm/vhpt.h>
#include <asm/vcpu.h>
+extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info);
#define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
newrrv.rid = newrid;
newrrv.ve = VHPT_ENABLED_REGION_7;
newrrv.ps = IA64_GRANULE_SHIFT;
- ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
+ ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
+ v->vcpu_info->arch.privregs);
}
else {
newrrv.rid = newrid;
newrrv.ve = 1; // VHPT now enabled for region 7!!
newrrv.ps = PAGE_SHIFT;
if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
- if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
+ if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
+ v->vcpu_info->arch.privregs);
else set_rr(rr,newrrv.rrval);
#endif
return 1;
//if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
//if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
if (ifs & regs->cr_ifs & 0x8000000000000000L) {
-#define SI_OFS(x) ((char *)(&PSCB(vcpu,x)) - (char *)(vcpu->vcpu_info))
-if (SI_OFS(iip)!=0x10 || SI_OFS(ipsr)!=0x08 || SI_OFS(ifs)!=0x18) {
-printf("SI_CR_IIP/IPSR/IFS_OFFSET CHANGED, SEE dorfirfi\n");
-printf("SI_CR_IIP=0x%x,IPSR=0x%x,IFS_OFFSET=0x%x\n",SI_OFS(iip),SI_OFS(ipsr),SI_OFS(ifs));
-while(1);
-}
// TODO: validate PSCB(vcpu,iip)
// TODO: PSCB(vcpu,ipsr) = psr;
PSCB(vcpu,ipsr) = psr.i64;
// FIXME? Note that this turns off the DB bit (debug)
#define PSR_BITS_TO_SET IA64_PSR_BN
+//extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info);
GLOBAL_ENTRY(ia64_new_rr7)
// not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 2, 7, 0, 0
+ alloc loc1 = ar.pfs, 3, 8, 0, 0
1: {
mov r28 = in0 // copy procedure index
mov r8 = ip // save ip to compute branch
;;
tpa loc5=loc5 // grab this BEFORE changing rr7
;;
+ mov loc7=in2 // arch_vcpu_info_t
+ ;;
+ tpa loc7=loc7 // grab this BEFORE changing rr7
+ ;;
mov loc3 = psr // save psr
adds r8 = 1f-1b,r8 // calculate return address for call
;;
;;
itr.d dtr[r25]=r23 // wire in new mapping...
;;
+ // Map for arch_vcpu_info_t
+ movl r22=SHARED_ARCHINFO_ADDR
+ ;;
+ movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
+ ;;
+ mov r21=loc7 // saved sharedinfo physical address
+ ;;
+ or r23=r25,r21 // construct PA | page properties
+ mov r24=PAGE_SHIFT<<2
+ ;;
+ ptr.d r22,r24
+ ;;
+ mov cr.itir=r24
+ mov cr.ifa=r22
+ ;;
+ mov r25=IA64_TR_ARCH_INFO
+ ;;
+ itr.d dtr[r25]=r23 // wire in new mapping...
+ ;;
// done, switch back to virtual and return
mov r16=loc3 // r16= original psr
END(__get_domain_bundle)
GLOBAL_ENTRY(dorfirfi)
-#define SI_CR_IIP_OFFSET 0x10
-#define SI_CR_IPSR_OFFSET 0x08
-#define SI_CR_IFS_OFFSET 0x18
- movl r16 = SHAREDINFO_ADDR+SI_CR_IIP_OFFSET
- movl r17 = SHAREDINFO_ADDR+SI_CR_IPSR_OFFSET
- movl r18 = SHAREDINFO_ADDR+SI_CR_IFS_OFFSET
+ movl r16 = XSI_IIP
+ movl r17 = XSI_IPSR
+ movl r18 = XSI_IFS
;;
ld8 r16 = [r16]
ld8 r17 = [r17]
#ifndef __ASM_EVENT_H__
#define __ASM_EVENT_H__
+#include <public/arch-ia64.h>
#include <asm/vcpu.h>
static inline void evtchn_notify(struct vcpu *v)
{
- vcpu_pend_interrupt(v, VCPU(v,evtchn_vector));
+ vcpu_pend_interrupt(v, v->vcpu_info->arch.evtchn_vector);
}
#endif
typedef struct pt_regs REGS;
-#define VCPU(_v,_x) _v->vcpu_info->arch._x
+#define VCPU(_v,_x) _v->vcpu_info->arch.privregs->_x
#define PRIVOP_ADDR_COUNT
#ifdef PRIVOP_ADDR_COUNT
#define VPD_SHIFT 17 /* 128K requirement */
#define VPD_SIZE (1 << VPD_SHIFT)
-typedef union {
- unsigned long value;
- struct {
- int a_int:1;
- int a_from_int_cr:1;
- int a_to_int_cr:1;
- int a_from_psr:1;
- int a_from_cpuid:1;
- int a_cover:1;
- int a_bsw:1;
- long reserved:57;
- };
-} vac_t;
-
-typedef union {
- unsigned long value;
- struct {
- int d_vmsw:1;
- int d_extint:1;
- int d_ibr_dbr:1;
- int d_pmc:1;
- int d_to_pmd:1;
- int d_itm:1;
- long reserved:58;
- };
-} vdc_t;
typedef struct {
unsigned long dcr; // CR0
unsigned long rsv6[46];
} cr_t;
-typedef struct vpd {
- vac_t vac;
- vdc_t vdc;
- unsigned long virt_env_vaddr;
- unsigned long reserved1[29];
- unsigned long vhpi;
- unsigned long reserved2[95];
- unsigned long vgr[16];
- unsigned long vbgr[16];
- unsigned long vnat;
- unsigned long vbnat;
- unsigned long vcpuid[5];
- unsigned long reserved3[11];
- unsigned long vpsr;
- unsigned long vpr;
- unsigned long reserved4[76];
- unsigned long vcr[128];
- unsigned long reserved5[128];
- unsigned long reserved6[3456];
- unsigned long vmm_avail[128];
- unsigned long reserved7[4096];
-} vpd_t;
-
void vmx_enter_scheduler(void);
//FIXME: Map for LID to vcpu, Eddie
#define XEN_RR7_SWITCH_STUB 0xb700000000000000
#endif // CONFIG_VTI
+#define XEN_START_ADDR 0xf000000000000000
#define KERNEL_START 0xf000000004000000
#define PERCPU_ADDR 0xf100000000000000-PERCPU_PAGE_SIZE
#define SHAREDINFO_ADDR 0xf100000000000000
#define VHPT_ADDR 0xf200000000000000
+#define SHARED_ARCHINFO_ADDR 0xf300000000000000
+#define XEN_END_ADDR 0xf400000000000000
#ifndef __ASSEMBLY__
struct pt_fpreg f11; /* scratch */
};
+typedef union {
+ unsigned long value;
+ struct {
+ int a_int:1;
+ int a_from_int_cr:1;
+ int a_to_int_cr:1;
+ int a_from_psr:1;
+ int a_from_cpuid:1;
+ int a_cover:1;
+ int a_bsw:1;
+ long reserved:57;
+ };
+} vac_t;
+
+typedef union {
+ unsigned long value;
+ struct {
+ int d_vmsw:1;
+ int d_extint:1;
+ int d_ibr_dbr:1;
+ int d_pmc:1;
+ int d_to_pmd:1;
+ int d_itm:1;
+ long reserved:58;
+ };
+} vdc_t;
+
typedef struct {
- unsigned long ipsr;
- unsigned long iip;
- unsigned long ifs;
- unsigned long precover_ifs;
- unsigned long isr;
- unsigned long ifa;
- unsigned long iipa;
- unsigned long iim;
- unsigned long unat; // not sure if this is needed until NaT arch is done
- unsigned long tpr;
- unsigned long iha;
- unsigned long itir;
- unsigned long itv;
- unsigned long pmv;
- unsigned long cmcv;
- unsigned long pta;
- int interrupt_collection_enabled; // virtual psr.ic
- int interrupt_delivery_enabled; // virtual psr.i
- int pending_interruption;
- int incomplete_regframe; // see SDM vol2 6.8
- unsigned long delivery_mask[4];
- int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
- int banknum; // 0 or 1, which virtual register bank is active
- unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
- unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
- unsigned long rrs[8]; // region registers
- unsigned long krs[8]; // kernel registers
- unsigned long pkrs[8]; // protection key registers
- unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
+ vac_t vac;
+ vdc_t vdc;
+ unsigned long virt_env_vaddr;
+ unsigned long reserved1[29];
+ unsigned long vhpi;
+ unsigned long reserved2[95];
+ union {
+ unsigned long vgr[16];
+ unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
+ };
+ union {
+ unsigned long vbgr[16];
+ unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
+ };
+ unsigned long vnat;
+ unsigned long vbnat;
+ unsigned long vcpuid[5];
+ unsigned long reserved3[11];
+ unsigned long vpsr;
+ unsigned long vpr;
+ unsigned long reserved4[76];
+ union {
+ unsigned long vcr[128];
+ struct {
+ unsigned long dcr; // CR0
+ unsigned long itm;
+ unsigned long iva;
+ unsigned long rsv1[5];
+ unsigned long pta; // CR8
+ unsigned long rsv2[7];
+ unsigned long ipsr; // CR16
+ unsigned long isr;
+ unsigned long rsv3;
+ unsigned long iip;
+ unsigned long ifa;
+ unsigned long itir;
+ unsigned long iipa;
+ unsigned long ifs;
+ unsigned long iim; // CR24
+ unsigned long iha;
+ unsigned long rsv4[38];
+ unsigned long lid; // CR64
+ unsigned long ivr;
+ unsigned long tpr;
+ unsigned long eoi;
+ unsigned long irr[4];
+ unsigned long itv; // CR72
+ unsigned long pmv;
+ unsigned long cmcv;
+ unsigned long rsv5[5];
+ unsigned long lrr0; // CR80
+ unsigned long lrr1;
+ unsigned long rsv6[46];
+ };
+ };
+ union {
+ unsigned long reserved5[128];
+ struct {
+ unsigned long precover_ifs;
+ unsigned long unat; // not sure if this is needed until NaT arch is done
+ int interrupt_collection_enabled; // virtual psr.ic
+ int interrupt_delivery_enabled; // virtual psr.i
+ int pending_interruption;
+ int incomplete_regframe; // see SDM vol2 6.8
+ unsigned long delivery_mask[4];
+ int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
+ int banknum; // 0 or 1, which virtual register bank is active
+ unsigned long rrs[8]; // region registers
+ unsigned long krs[8]; // kernel registers
+ unsigned long pkrs[8]; // protection key registers
+ unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
+ };
+ };
+#ifdef CONFIG_VTI
+ unsigned long reserved6[3456];
+ unsigned long vmm_avail[128];
+ unsigned long reserved7[4096];
+#endif
+} mapped_regs_t;
+
+typedef struct {
+ mapped_regs_t *privregs;
int evtchn_vector;
} arch_vcpu_info_t;
+
+typedef arch_vcpu_info_t vpd_t;
+
#define __ARCH_HAS_VCPU_INFO
typedef struct {